Advanced Lane Finding Project

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.

Preperation and utilities

In [1]:
FLG_DEBUG = False #True
In [2]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
In [3]:
def show_images(images):

    num_img = len(images)
    #print(num_img)
    #plt.figure(figsize=(15,5))
    #fig, axes = plt.subplots(num_img, 1)
    #for axe, i in zip(axes.flat, range(num_img)):
    #    print(i)
    #    axe.imshow(images[i])
    #    #plt.imshow(images[i])
    for i in range(num_img):
        #print("i:",i)
        plt.figure(i)
        #plt.subplot(num_img, 1, i+1)
        
        plt.imshow(images[i])
        #plt.imshow(images[i])

#plt.show()
In [4]:
def show_two_images(img1, img2, title1="Original Image", title2="Result Image"):

    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()
    ax1.imshow(img1)
    ax1.set_title(title1, fontsize=50)
    ax2.imshow(img2)
    ax2.set_title(title2, fontsize=50)
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
In [5]:
def show_pair_images(images1, images2):
    for i in range(len(images1)):
        show_two_images(images1[i], imges2[i])

Camera calibration

First, I'll compute the camera calibration using chessboard images

In [6]:
# a function that takes an image, object points, and image points
# performs the camera calibration, image distortion correction and 
# returns the undistorted image
def cal_undistort(img, objpoints, imgpoints):
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
    img_dst = cv2.undistort(img, mtx, dist, None, mtx)
    return img_dst
In [7]:
def compute_camera_calibration(images,chessboard_size_x=9, chessboard_size_y=6):
    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((chessboard_size_y*chessboard_size_x,3), np.float32)
    objp[:,:2] = np.mgrid[0:chessboard_size_x,0:chessboard_size_y].T.reshape(-1,2)

    # Arrays to store object points and image points from all the images.
    objpoints = [] # 3d points in real world space
    imgpoints = [] # 2d points in image plane.

    # Make a list of calibration images
    images_converted = []

    # Step through the list and search for chessboard corners
    for fname in images:
        #print(fname)
        img = cv2.imread(fname)
        img_converted = np.copy(img)
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

        # Find the chessboard corners
        ret, corners = cv2.findChessboardCorners(gray, (chessboard_size_x,chessboard_size_y),None)

        # If found, add object points, image points
        if ret == True:
            objpoints.append(objp)
            imgpoints.append(corners)

            # Draw and display the corners
            img_converted = cv2.drawChessboardCorners(img_converted, (chessboard_size_x,chessboard_size_y), corners, ret)
            images_converted.append(img_converted)
            
            # Undistort the original image using objpoints and imgpoints
            img_undistorted = cal_undistort(img, objpoints, imgpoints)
            show_two_images(img_converted, img_undistorted)

    return objpoints, imgpoints
In [8]:
camera_images = glob.glob('./camera_cal/calibration*.jpg')
objpoints, imgpoints = compute_camera_calibration(camera_images)

Correcting for Distortion

use chessboard images to obtain image points and object points, and then compute the calibration and undistortion.

In [9]:
test_img = cv2.imread('./camera_cal/calibration2.jpg')

test_img_undistorted = cal_undistort(test_img, objpoints, imgpoints)
show_two_images(test_img, test_img_undistorted, title2="Undistorted Image")
In [10]:
test_image = mpimg.imread('./test_images/straight_lines1.jpg')
test_image_undistorted = cal_undistort(test_image, objpoints, imgpoints)
show_two_images(test_image, test_image_undistorted, title2="Undistorted Image")

Magnitude of the Gradient

Color and Gradient

In [11]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg


# Edit this function to create your own pipeline.
def pipeline_thresh(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
    img = np.copy(img)
    # Convert to HLS color space and separate the V channel
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
    l_channel = hls[:,:,1]
    s_channel = hls[:,:,2]
    
    # Grayscale image
    # NOTE: we already saw that standard grayscaling lost color information for the lane lines
    # Explore gradients in other colors spaces / color channels to see what might work better
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    # Sobel x
    #sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
    
    abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
    scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
    
    # Threshold x gradient
    sxbinary = np.zeros_like(scaled_sobel)
    sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
    
    # Threshold color channel
    s_binary = np.zeros_like(s_channel)
    s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
    # Stack each channel
    # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might
    # be beneficial to replace this channel with something else.
    color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
    
    # Combine the two binary thresholds
    combined_binary = np.zeros_like(sxbinary)
    combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1

    return combined_binary
In [12]:
#image = mpimg.imread('./test_images/test5.jpg')
test_image = mpimg.imread('./test_images/straight_lines1.jpg')

test_image_threshed = pipeline_thresh(test_image)

show_two_images(test_image, test_image_threshed, title2="Thresh Pipeline Result")

I checked the pipeline using the following image from the video, too.

In [13]:
test_img = mpimg.imread('./issue_images/org_video_image_20171217_10_22_06.jpg')
test_output = pipeline_thresh(test_img, s_thresh=(170, 255), sx_thresh=(5, 100))
show_two_images(test_img, test_output, title2="Magnitude Image")

Warp

In [14]:
Y_TOP = 435#440
Y_BTM = 668

X_CTR = 640#640 = 1280/2

X_TOP_WDT = 48#45#50#55#65#50#50#60#60
X_BTM_WDT = 1000#1280#1000#1000#900#850

# less space is used
Y_TOP = 450#435#440
X_TOP_WDT = 120#48#45#50#55#65#50#50#60#60


TOP_LEFT = (X_CTR - X_TOP_WDT/2 , Y_TOP)
TOP_RIGHT = (X_CTR + X_TOP_WDT/2, Y_TOP)
BTM_LEFT = (X_CTR - X_BTM_WDT/2 , Y_BTM)
BTM_RIGHT = (X_CTR + X_BTM_WDT/2, Y_BTM)

SRC_TRAPEZOID_ORG_ORDR = [TOP_LEFT, TOP_RIGHT, BTM_RIGHT, BTM_LEFT]
#SRC_TRAPEZOID_WRP = [TOP_LEFT, TOP_RIGHT, BTM_LEFT, BTM_RIGHT]
In [15]:
def pipeline_warp(image, offsetX = 50, offsetY = 0):
    offset = 100 # offset for dst points
    img_size = (image.shape[1], image.shape[0])
    w = img_size[1]
    h = img_size[1]

    offsetX = 30 # test
    
    src = np.float32(SRC_TRAPEZOID_ORG_ORDR)

    dst = np.float32([[offsetX, offsetY], [w - offsetX, offsetY], 
                      [w - offsetX, h - offsetY],
                      [offsetX, h - offsetY]])
        
    M = cv2.getPerspectiveTransform(src, dst)
    
    warped = cv2.warpPerspective(image,M,(h, w))
    
    Minv = cv2.getPerspectiveTransform(dst, src)

    return warped, M, Minv
In [16]:
test_img = mpimg.imread('./test_images/test1.jpg')
test_image_threshed = pipeline_thresh(test_img)
test_image_warped, M, Minv = pipeline_warp(test_image_threshed)
show_two_images(test_img, test_image_threshed, title2="Binary Image")
show_two_images(test_img, test_image_warped, title2="Warped Image")

Finding the Lines

Class definition

In [17]:
#SPEC_CAMERA_IMG_HEIGHT = 720?
#SPEC_CAMERA_IMG_WIDTH = 1300?
SPEC_WARP_IMG_HEIGHT = 720
SPEC_WARP_IMG_WIDTH = 720
SPEC_WARP_IMG_X_MID = 360
#U.S. regulations that require a minimum lane width of 12 feet or 3.7 meters
LANE_WIDTH_IN_METER = 3.7
OBSERVED_LANE_PIX_IN_WARP_IMG = 500
OBSERVED_METER_PER_PIXEL = LANE_WIDTH_IN_METER/OBSERVED_LANE_PIX_IN_WARP_IMG
In [18]:
def calc_vehicle_position(left_fit, right_fit, image):
    #y_btm = 720#
    y_btm = image.shape[0] # Y
    #print("image y", image.shape[0])
    #print("image x", image.shape[1])
    #image_mid = 720//2
    image_mid = image.shape[1]/2 # X
    #print("image_mid", image_mid)
    return find_vehicle_position(left_fit, right_fit, y_btm, image_mid)

def find_vehicle_position(left_fit, right_fit, y_btm=SPEC_WARP_IMG_HEIGHT, image_mid=SPEC_WARP_IMG_X_MID):

    x_left = left_fit[0]*y_btm**2 + left_fit[1]*y_btm + left_fit[2]
    x_right = right_fit[0]*y_btm**2 + right_fit[1]*y_btm + right_fit[2]
    
    #print("x_left", x_left)
    #print("x_right", x_right)
    
    lane_width = x_right - x_left

    x_mid = x_left + (lane_width)//2
    
    #print("x_mid", x_mid)
    
    vehicle_position = image_mid - x_mid

    vehicle_position_meter = vehicle_position*OBSERVED_METER_PER_PIXEL
    
    return vehicle_position_meter#vehicle_position
In [19]:
def calc_radius_of_curvature(fit_cr, y_eval=SPEC_WARP_IMG_HEIGHT-1):
    # Define conversions in x and y from pixels space to meters
    #ym_per_pix = 30/720 # meters per pixel in y dimension
    #xm_per_pix = 3.7/700 # meters per pixel in x dimension
    
    #ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = LANE_WIDTH_IN_METER/OBSERVED_LANE_PIX_IN_WARP_IMG # meters per pixel in x dimension
    ym_per_pix = xm_per_pix # warped image has the same y pix as x pix
    
    # Calculate the new radii of curvature
    if fit_cr[0] == 0:
        radius = 0
    else:
        radius = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
    # Now our radius of curvature is in meters
    return radius
In [20]:
# Define a class to receive the characteristics of each line detection
class Line():

    def __init__(self, name=""):
        self.__LAST_N_ITERATIONS = 5

        self.__FIT_OUTLIER_0 = 1
        self.__FIT_OUTLIER_1 = 10
        self.__FIT_OUTLIER_2 = 200
        
        self.name = name
        # was the line detected in the last iteration?
        self.detected = False  
        # x values of the last n fits of the line
        self.recent_xfitted = [] 
        
        #average x values of the fitted line over the last n iterations
        self.bestx = None     
        #polynomial coefficients averaged over the last n iterations
        self.best_fit = None 
        
        #polynomial coefficients for the most recent fit
        self.current_fit = np.array([0,0,0], dtype='float')  # Not sure why [np.array([False])]  
        
        self.prev_fit = np.array([0,0,0], dtype='float') 
        self.outlier_fit = np.array([0,0,0], dtype='float') 
        
        #radius of curvature of the line in some units
        self.radius_of_curvature = None 
        #distance in meters of vehicle center from the line
        self.line_base_pos = None 
        
        #difference in fit coefficients between last and new fits
        self.diffs = np.array([0,0,0], dtype='float') 
        
        #x values for detected line pixels
        self.allx = None  
        #y values for detected line pixels
        self.ally = None
        
    def calc_average_x_values_of_fitted_line(self):
        #average x values of the fitted line over the last n iterations
        if len(self.recent_xfitted) > self.__LAST_N_ITERATIONS:
            self.recent_xfitted = self.recent_xfitted[:self.__LAST_N_ITERATIONS-1]
        self.bestx = np.array(self.recent_xfitted).mean(axis=0).tolist()  

    def update_averaged_polynomial_coefficients(self):
        #polynomial coefficients averaged over the last n iterations
        if FLG_DEBUG == True:
            print("name", self.name)
            print("detected", self.detected)
            print("diffs", self.diffs)
            print("current_fit", self.current_fit)
            print("bestx", self.bestx)
            print("ally", self.ally)
        self.best_fit = np.polyfit(self.ally,self.bestx, 2)
        #p = np.poly1d(self.current_fit)
        #self.best_fit = np.polyfit(self.ally,p(self.ally), 2)
        #z2 = np.polyfit(xp, p(xp), 3)
        #recalc_best_fit = np.polyfit(ploty,left_x_values, 2)
    
    def calc_radius_of_curvature(self):
        self.radius_of_curvature = calc_radius_of_curvature(self.current_fit)

    def calc_diff(self, prev_fit, current_fit):
        self.diffs[0] = abs(current_fit[0] - prev_fit[0])
        self.diffs[1] = abs(current_fit[1] - prev_fit[1])
        self.diffs[2] = abs(current_fit[2] - prev_fit[2])

    def add_recent_data(self, cur_fit, x_values):
        if FLG_DEBUG == True:
            print("current_fit", self.current_fit.shape)
            print("prev_fit", self.prev_fit.shape)
        # was the line detected in the last iteration?
        #self.detected = True  

        # x values of the last n fits of the line
        #self.recent_xfitted.append(current_fit)# = [] #?
        
        #x values for detected line pixels
        self.allx = x_values
        #y values for detected line pixels
        #self.ally = nonzeroy

        #polynomial coefficients for the most recent fit
        #self.prev_fit = self.current_fit
        #self.current_fit = current_fit 
    
        self.calc_diff(self.current_fit, cur_fit)
        #self.detected = True
        #if self.diffs[0] > self.__FIT_OUTLIER_0 or self.diffs[1] > self.__FIT_OUTLIER_1 or self.diffs[2] > self.__FIT_OUTLIER_2:
        #    self.detected = False
        #if self.detected == False:
        #    self.outlier_fit = current_fitx
        #else:
        #    self.prev_fit = self.current_fit
        #    self.current_fit = current_fitx
        #self.adjust()
        
        self.current_fit = cur_fit
        self.check()
        if self.detected == True:
            self.recent_xfitted.append(x_values)
        else:
            self.outlier_fit = self.current_fit
            
        self.calc_radius_of_curvature()

        self.calc_average_x_values_of_fitted_line()
        self.update_averaged_polynomial_coefficients()
        
    def select_fit(self):
        return self.best_fit
        #if self.detected == True:
        #    return self.current_fit
        #else:
        #    return self.best_fit
            
    #def adjust(self):
    #    self.outlier_fit = self.current_fit
    #    self.current_fit = self.prev_fit
    
    def check(self):
        self.detected = True
        if len(self.recent_xfitted) != 0:
            if self.diffs[0] > self.__FIT_OUTLIER_0 or self.diffs[1] > self.__FIT_OUTLIER_1 or self.diffs[2] > self.__FIT_OUTLIER_2:
                self.detected = False
        #if self.detected == False:
        #    self.adjust()
        #print("Detect False Cond1",(self.diffs[0] > self.__FIT_OUTLIER_0))
        #print("Detect False Cond2",(self.diffs[1] > self.__FIT_OUTLIER_1))
        #print("Detect False Cond3",(self.diffs[2] > self.__FIT_OUTLIER_2))

    def text_diffs(self):
        return "diff:{:+.4f} {:+.4f} {:+.4f}".format(self.diffs[0],self.diffs[1],self.diffs[2])
    def text_fits(self): 
        return "fits:{:+.4f} {:+.4f} {:+.4f}".format(self.current_fit[0],self.current_fit[1],self.current_fit[2])

    def print_line_info(self):
        if self.name != None:
            print(self.name)
        print(self.text_diffs())
        print(self.text_fits())
        if FLG_DEBUG == True:
            print("diffs", self.diffs.shape)
            print("fits", self.fits.shape)
In [21]:
class Lane():
    def __init__(self):
        self.right_line = Line("R")  
        self.left_line = Line("L")
        #distance in meters of vehicle center from the line
        self.line_base_pos = None

    def calc_radius_of_curvature(self):
        #print(lane.left_line)
        return (self.left_line.radius_of_curvature + self.right_line.radius_of_curvature)/2

    def find_vehicle_position(self):
        self.line_base_pos = find_vehicle_position(self.left_line.current_fit, self.right_line.current_fit)
        return self.line_base_pos
    
    def adjust(self):
        self.right_line.adjust()
        self.left_line.adjust()
    def print_lane_info(self):
        print("[Left Line]")
        self.left_line.print_line_info()
        print("[Right Line]")
        self.right_line.print_line_info()
  
    def text_radius(self): return "Radius:{:.4f} ".format(self.calc_radius_of_curvature())
    def text_position(self): return "Vehicle Position:{:.4f} ".format(self.find_vehicle_position())
    def text_diffs_left(self): return "L diff:{:+.4f} {:+.4f} {:+.4f}".format(self.left_line.diffs[0],self.left_line.diffs[1],self.left_line.diffs[2])
    def text_diffs_right(self): return "R diff:{:+.4f} {:+.4f} {:+.4f}".format(self.right_line.diffs[0],self.right_line.diffs[1],self.right_line.diffs[2])
    def text_fits_left(self): return "L fit:{:+.4f} {:+.4f} {:+.4f}".format(self.left_line.current_fit[0],self.left_line.current_fit[1],self.left_line.current_fit[2])
    def text_fits_right(self): return "R fit:{:+.4f} {:+.4f} {:+.4f}".format(self.right_line.current_fit[0],self.right_line.current_fit[1],self.right_line.current_fit[2])
    def text_outlier_fits_left(self): return "L out:{:+.4f} {:+.4f} {:+.4f}".format(self.left_line.outlier_fit[0],self.left_line.outlier_fit[1],self.left_line.outlier_fit[2])
    def text_outlier_fits_right(self): return "R out:{:+.4f} {:+.4f} {:+.4f}".format(self.right_line.outlier_fit[0],self.right_line.outlier_fit[1],self.right_line.outlier_fit[2])
    def text_best_fit_left(self): return "L best:{:+.4f} {:+.4f} {:+.4f}".format(self.left_line.best_fit[0],self.left_line.best_fit[1],self.left_line.best_fit[2])
    def text_best_fit_right(self): return "R best:{:+.4f} {:+.4f} {:+.4f}".format(self.right_line.best_fit[0],self.right_line.best_fit[1],self.right_line.best_fit[2])

    def text_alert_msg_left(self):
        msg = ""
        if (self.left_line.detected == False):
            msg = msg + self.text_outlier_fits_left()
        return msg
    def text_alert_msg_right(self):
        msg = ""
        if (self.right_line.detected == False):
            msg = msg + self.text_outlier_fits_right()
        return msg
    def text_alert_msg(self):
        msg = ""
        if (self.left_line.detected == False):
            msg = msg + self.text_outlier_fits_left()
        if (self.right_line.detected == False):
            msg = msg + self.text_outlier_fits_right()
        return msg

    def print_lane_info(self):
        print(self.text_radius())
        print(self.text_position())
        print(self.text_diffs_left())
        print(self.text_diffs_right())
        print(self.text_fits_left())
        print(self.text_fits_right())
        print(self.text_best_fit_left())
        print(self.text_best_fit_right())

Function Definition

In [22]:
# Assuming you have created a warped binary image called "binary_warped"
def find_lane(binary_warped, lane = Lane(), viz=False):

    # Choose the number of sliding windows
    nwindows = 9
    # Set the width of the windows +/- margin
    margin = 100
    #margin = 150
    # Set minimum number of pixels found to recenter window
    minpix = 50
    
    # Take a histogram of the bottom half of the image
    #histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
    histogram = np.sum(binary_warped[np.int(binary_warped.shape[0]/2):,:], axis=0)

    if (viz == True) :
        # Create an output image to draw on and  visualize the result
        out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255

    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]/2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint

    # Set height of windows
    window_height = np.int(binary_warped.shape[0]/nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated for each window
    leftx_current = leftx_base
    rightx_current = rightx_base

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []

    # Step through the windows one by one
    right_stop = False
    left_stop = False
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        if (viz == True) :
            # Draw the windows on the visualization image
            cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
            cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) #(0,0,255), 2) 
        # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
        
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
        # Append these indices to the lists
        ##if left_stop == False:
        left_lane_inds.append(good_left_inds)
        ##if right_stop == False:
        right_lane_inds.append(good_right_inds)
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)

    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 

    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)

    #right_line = Line()
    #left_line = Line()
    
    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_x_values = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_x_values = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

    if lane.left_line.ally == None:
        lane.left_line.ally = ploty
    if lane.right_line.ally == None:
        lane.right_line.ally = ploty   
    lane.left_line.add_recent_data(left_fit, left_x_values)
    lane.right_line.add_recent_data(right_fit, right_x_values)
    
    #lane.right_line.add_recent_data(right_fit, nonzerox, nonzeroy)
    #lane.left_line.add_recent_data(left_fit, nonzerox, nonzeroy)

    #lane.right_line = right_line
    #lane.left_line = left_line
    
    if (viz == True):
        visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)
        lane.print_lane_info()
    if (FLG_DEBUG == True):
        p = np.poly1d(left_fit)
        #recalc_best_fit = np.polyfit(left_x_values[::-1], ploty[::-1], 2)
        recalc_best_fit = np.polyfit(ploty,left_x_values, 2)
        print("L recalc:{:+.4f} {:+.4f} {:+.4f}".format(recalc_best_fit[0],recalc_best_fit[1],recalc_best_fit[2]))
        #print("left x values", left_x_values[::-1])
        #print("left x data", leftx)
        #print("left y data", lefty)
        #print("ploty data", ploty[::-1])
        

    return lane #left_fit, right_fit, right_line, left_line

Skip the sliding windows step once you know where the lines are

Now I know where the lines are you have a fit! In the next frame of video I don't need to do a blind search again, but instead I can just search in a margin around the previous line position.

In [23]:
# Assume you now have a new warped binary image 
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
"""
def find_lane_again(binary_warped, lane, viz=False):
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    margin = 100
    
    margin = 50
    #margin = 30
    
    left_fit = lane.left_line.current_fit
    right_fit = lane.right_line.current_fit
    
    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + 
    left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + 
    left_fit[1]*nonzeroy + left_fit[2] + margin))) 

    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + 
    right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + 
    right_fit[1]*nonzeroy + right_fit[2] + margin)))  

    # Again, extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    if (viz == True):
        visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)
        
    return left_fit, right_fit, right_line, left_line
"""
Out[23]:
'\ndef find_lane_again(binary_warped, lane, viz=False):\n    nonzero = binary_warped.nonzero()\n    nonzeroy = np.array(nonzero[0])\n    nonzerox = np.array(nonzero[1])\n    margin = 100\n    \n    margin = 50\n    #margin = 30\n    \n    left_fit = lane.left_line.current_fit\n    right_fit = lane.right_line.current_fit\n    \n    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + \n    left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + \n    left_fit[1]*nonzeroy + left_fit[2] + margin))) \n\n    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + \n    right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + \n    right_fit[1]*nonzeroy + right_fit[2] + margin)))  \n\n    # Again, extract left and right line pixel positions\n    leftx = nonzerox[left_lane_inds]\n    lefty = nonzeroy[left_lane_inds] \n    rightx = nonzerox[right_lane_inds]\n    righty = nonzeroy[right_lane_inds]\n    # Fit a second order polynomial to each\n    left_fit = np.polyfit(lefty, leftx, 2)\n    right_fit = np.polyfit(righty, rightx, 2)\n    # Generate x and y values for plotting\n    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )\n    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]\n    \n    if (viz == True):\n        visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)\n        \n    return left_fit, right_fit, right_line, left_line\n'
In [24]:
def find_lane_again(binary_warped, lane, viz=False): 
    
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    margin = 100
    
    #margin = 50
    #margin = 30
    
    #left_fit = lane.left_line.current_fit
    #right_fit = lane.right_line.current_fit
    left_fit = lane.left_line.best_fit
    right_fit = lane.right_line.best_fit
    if FLG_DEBUG == True:
        lane.print_lane_info
        print("left_fit",left_fit)
        print("right_fit",right_fit)
    
    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + 
    left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + 
    left_fit[1]*nonzeroy + left_fit[2] + margin))) 

    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + 
    right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + 
    right_fit[1]*nonzeroy + right_fit[2] + margin)))  

    # Again, extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    # Fit a second order polynomial to each
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    # Generate x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    left_x_values = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_x_values = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    lane.left_line.add_recent_data(left_fit, left_x_values)
    lane.right_line.add_recent_data(right_fit, right_x_values)
    
    if (viz == True) :
        # Create an output image to draw on and  visualize the result
        out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
        visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)
        lane.print_lane_info()
        
    return lane#left_fit, right_fit, right_line, left_line
In [25]:
# Calculate the new radii of curvature
"""
def calc_radii_of_curvature(fit_cr, y_eval, ym_per_pix):
    curverad = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
    return curverad
"""
#left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
#right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
Out[25]:
'\ndef calc_radii_of_curvature(fit_cr, y_eval, ym_per_pix):\n    curverad = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])\n    return curverad\n'

Visualization

In [26]:
 def put_lane_info(image, lane):
    cv2.putText(image,lane.text_radius(),(50,100),fontFace = cv2.FONT_ITALIC, fontScale = 3, color = (0,0,255))#255,255,255))
    cv2.putText(image,lane.text_position(),(50,200),fontFace = cv2.FONT_ITALIC, fontScale = 3, color = (0,0,255))#255,255,255))
    cv2.putText(image,lane.text_diffs_left(),(50,250),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (255,0,0))#255,255,255))
    cv2.putText(image,lane.text_diffs_right(),(50,300),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (255,0,0))#255,255,255))
    cv2.putText(image,lane.text_fits_left(),(50,350),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (0,255,0))#255,255,255))
    cv2.putText(image,lane.text_fits_right(),(50,400),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (0, 255,0))#255,255,255))
    cv2.putText(image,lane.text_best_fit_left(),(50,450),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (0,0,255))#255,255,255))
    cv2.putText(image,lane.text_best_fit_right(),(50,500),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (0,0,255))#255,255,255))
    cv2.putText(image,lane.text_alert_msg_left(),(50,550),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (255,255,255))#255,255,255))
    cv2.putText(image,lane.text_alert_msg_right(),(50,600),fontFace = cv2.FONT_ITALIC, fontScale = 1, color = (255,255,255))#255,255,255))
    return image
In [27]:
def draw_polylines(img):
    #result = cv.rectangle(img, (10,   50), (50,  150), (255, 0, 0), 3, 4)
    #image = cv2.polylines(img, np.float32(SRC_RECTANGLE), True, (0, 0, 255), 5)
    pts = np.array(SRC_TRAPEZOID_ORG_ORDR, np.int32)
    pts = pts.reshape((-1, 1, 2))
    #pts = np.float32(SRC_RECTANGLE)
    copy = img.copy()
    cv2.polylines(copy, [pts],True,(255,0,0), thickness=2)
    return copy
#X_CENTER = 410
#Y_CENTER = 610
#X_BOTTOM = 660
#X_OFFSET = 5
#Y_OFFSET = 400
In [28]:
class LineProjector():
    def __init__(self, Minv):
        self.Minv = Minv
        
    def project_lines(self, original_image, warped, left_fit, right_fit):
        
        ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
        
        #print("ploty", ploty)
        
        left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
        right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

        # Create an image to draw the lines on
        warp_zero = np.zeros_like(warped).astype(np.uint8)
        color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

        # Recast the x and y points into usable format for cv2.fillPoly()
        pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
        pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
        pts = np.hstack((pts_left, pts_right))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))

        # Warp the blank back to original image space using inverse perspective matrix (Minv)
        #newwarp = cv2.warpPerspective(color_warp, self.Minv, (image.shape[1], image.shape[0])) 
        newwarp = cv2.warpPerspective(color_warp, self.Minv, (original_image.shape[1], original_image.shape[0])) 
        
        # Combine the result with the original image
        result = cv2.addWeighted(original_image, 1, newwarp, 0.3, 0)
        
        return result
In [29]:
# Generate x and y values for plotting
def visualize_lines(warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img):
    
    ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
    
    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
    
    plt.imshow(out_img)
    
    # Generate x and y values for plotting
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    plt.plot(left_fitx, ploty, color='yellow')
    plt.plot(right_fitx, ploty, color='yellow')
    
    #print(warped.shape[0], warped.shape[1])

    plt.xlim(0, 1280)
    plt.ylim(720, 0)
In [30]:
test_img = mpimg.imread('./test_images/test2.jpg')
image_threshed = pipeline_thresh(test_img)

binary_warped, M, Minv = pipeline_warp(image_threshed)

#left_fit, right_fit,left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img, right_line, left_line = find_lines(binary_warped)

#visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)

test_lane = find_lane(binary_warped, viz=True)

#print("left:", calc_radii_of_curvature(left_fit, y_eval, ym_per_pix))
#print("riht:",calc_radii_of_curvature(right_fit, y_eval, ym_per_pix))
Radius:2130.2359 
Vehicle Position:-0.3622 
L diff:+0.0003 +0.4201 +5.3262
R diff:+0.0004 +0.4976 +485.6865
L fit:-0.0003 +0.4201 +5.3262
R fit:-0.0004 +0.4976 +485.6865
L best:-0.0003 +0.4201 +5.3262
R best:-0.0004 +0.4976 +485.6865
In [31]:
import numpy as np
import matplotlib.pyplot as plt

x = np.array([0.0, 1.0, 2.0, 3.0,  4.0,  5.0])
y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
z = np.polyfit(x, y, 3)
print(z)
p = np.poly1d(z)
p30 = np.poly1d(np.polyfit(x, y, 30))
xp = np.linspace(-2, 6, 100)

plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '*')
plt.ylim(-2,2)
plt.show()

z2 = np.polyfit(xp, p(xp), 3)
print(z2)
[ 0.08703704 -0.81349206  1.69312169 -0.03968254]
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:9: RankWarning: Polyfit may be poorly conditioned
  if __name__ == '__main__':
[ 0.08703704 -0.81349206  1.69312169 -0.03968254]
In [32]:
test_img = mpimg.imread('./test_images/test2.jpg')
image_threshed = pipeline_thresh(test_img)

binary_warped, M, Minv = pipeline_warp(image_threshed)

#left_fit, right_fit, right_line, left_line = find_lines_again(binary_warped, left_fit, right_fit, right_line, left_line)
#visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)

test_lane = find_lane_again(binary_warped, test_lane, viz=True)
Radius:2133.1442 
Vehicle Position:-0.3631 
L diff:+0.0000 +0.0004 +0.0312
R diff:+0.0000 +0.0000 +0.0000
L fit:-0.0003 +0.4197 +5.3574
R fit:-0.0004 +0.4976 +485.6865
L best:-0.0003 +0.4199 +5.3418
R best:-0.0004 +0.4976 +485.6865

Processing Each Image

In [33]:
Y_TOP = 450#435#440
Y_BTM = 668

X_CTR = 640#640 = 1280/2

X_TOP_WDT = 120#48#45#50#55#65#50#50#60#60
X_BTM_WDT = 1000#1280#1000#1000#900#850

Y_TOP = 460#435#440
X_TOP_WDT = 150#48#45#50#55#65#50#50#60#60

#X_BTM_WDT = 900

TOP_LEFT = (X_CTR - X_TOP_WDT/2 , Y_TOP)
TOP_RIGHT = (X_CTR + X_TOP_WDT/2, Y_TOP)
BTM_LEFT = (X_CTR - X_BTM_WDT/2 , Y_BTM)
BTM_RIGHT = (X_CTR + X_BTM_WDT/2, Y_BTM)

SRC_TRAPEZOID_ORG_ORDR = [TOP_LEFT, TOP_RIGHT, BTM_RIGHT, BTM_LEFT]
#SRC_TRAPEZOID_WRP = [TOP_LEFT, TOP_RIGHT, BTM_LEFT, BTM_RIGHT]
In [34]:
def show_process_images(image, objpoints, imgpoints, lane=None, sx_thresh=(20, 100)):
    
    #put_text(image, "test")
    #cv2.putText(image,text,(start_x,start_y),font, font_size,color)
    #image = cv2.putText(image,"text",(10,10),fontFace = cv2.FONT_HERSHEY_PLAIN, fontScale = 10, color = (255,255,0))
    
    undistorted = cal_undistort(image, objpoints, imgpoints)
    #show_two_images(image, undistorted)
    thresed = pipeline_thresh(undistorted, sx_thresh=sx_thresh)
    #show_two_images(undistorted, thresed)
    binary_warped, M, Minv = pipeline_warp(thresed)
    undistorted_w_line = draw_polylines(undistorted)

    #show_two_images(undistorted_w_line, binary_warped)
    #left_fit, right_fit,left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img, right_line, left_line = find_lines(binary_warped)
    if lane == None:
        lane = find_lane(binary_warped, viz=True)
    else:
        lane = find_lane_again(binary_warped, lane, viz=True)

    left_fit = lane.left_line.current_fit
    right_fit = lane.right_line.current_fit

    lane.print_lane_info()
    """
    print("left_fit {:.4f}".format(left_fit))
    print("right_fit {:.4f}".format(right_fit))
    
    print("left_fit diffs {:.4f}".format(lane.left_line.diffs))
    print("right_fit diffs {:.4f}".format(lane.left_line.diffs))
    """
    line_projector = LineProjector(Minv)

    #visualize_lines(ploty, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy)

    result = line_projector.project_lines(undistorted_w_line, binary_warped, left_fit, right_fit)
    #result = project_lines(org_image, warped, left_fit, right_fit, ploty, Minv)
    
    #plt.imshow(result)
    show_two_images(thresed, binary_warped, title1="Original Image", title2="Warped Image")
    #empty = None
    
    ploty = np.linspace(0, image.shape[0]-1, image.shape[0] )
    y_eval = np.max(ploty)
    #print("y_eval", y_eval)
    #left_curverad, right_curverad = calc_radius_in_meter(y_eval, left_fit, right_fit)
    #print(left_curverad, right_curverad)
    #print(binary_warped.shape[0], binary_warped.shape[1])
    #vehicle_position = calc_vehicle_position(left_fit, right_fit, binary_warped)
    
    #text_radius = "Radius:{} ".format((left_curverad + right_curverad)/2)
    #text_position = "Vehicle Position:{} ".format(vehicle_position)
    #text_diffs_left = "Diffs R:{} {} {}".format(1,2,3)
    #text_diffs_right = "Diffs L:{} {} {}".format(1,2,3)
    #put_text(result, text)
    #print(text_position)
    #cv2.putText(result,text_radius,(50,100),fontFace = cv2.FONT_ITALIC, fontScale = 3, color = (0,0,255))#255,255,255))
    #cv2.putText(result,text_position,(50,200),fontFace = cv2.FONT_ITALIC, fontScale = 3, color = (0,255,0))#255,255,255))
    #cv2.putText(result,text_diffs_left,(50,300),fontFace = cv2.FONT_ITALIC, fontScale = 3, color = (255,0,0))#255,255,255))
    #cv2.putText(result,text_diffs_right,(50,400),fontFace = cv2.FONT_ITALIC, fontScale = 3, color = (255,0,0))#255,255,255))
    
    #print(lane.left_line.radius_of_curvature)
    
    ##result = put_lane_info(result, lane)
    
    show_two_images(result, result)
        
    #left_fit, right_fit,left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img = find_lines(binary_warped)
    
    #visualize_lines(binary_warped, left_fit, right_fit, left_lane_inds, right_lane_inds, nonzerox, nonzeroy, out_img)

    return lane #left_fit, right_fit
In [35]:
test_img = mpimg.imread('./test_images/straight_lines1.jpg')
#print(test_img.shape[0])
#print(test_img.shape[1])

_ = show_process_images(test_img, objpoints, imgpoints)
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:94: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:96: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
Radius:20154.4282 
Vehicle Position:-0.0802 
L diff:+0.0003 +0.4058 +102.3553
R diff:+0.0004 +0.5474 +144.8708
L fit:+0.0000 +0.0139 +107.7127
R fit:+0.0000 -0.0498 +630.5573
L best:-0.0002 +0.2846 +39.4654
R best:-0.0002 +0.3151 +533.9767
Radius:20154.4282 
Vehicle Position:-0.0802 
L diff:+0.0003 +0.4058 +102.3553
R diff:+0.0004 +0.5474 +144.8708
L fit:+0.0000 +0.0139 +107.7127
R fit:+0.0000 -0.0498 +630.5573
L best:-0.0002 +0.2846 +39.4654
R best:-0.0002 +0.3151 +533.9767
In [36]:
test_img = mpimg.imread('./test_images/test1.jpg')
_ = show_process_images(test_img, objpoints, imgpoints)
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:94: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:96: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
Radius:24614.0728 
Vehicle Position:-0.2113 
L diff:+0.0001 +0.1828 +75.8811
R diff:+0.0000 +0.0933 +100.2311
L fit:+0.0002 -0.1688 +183.5937
R fit:+0.0000 -0.1431 +730.7884
L best:-0.0001 +0.1712 +75.4975
R best:-0.0002 +0.2006 +583.1796
Radius:24614.0728 
Vehicle Position:-0.2113 
L diff:+0.0001 +0.1828 +75.8811
R diff:+0.0000 +0.0933 +100.2311
L fit:+0.0002 -0.1688 +183.5937
R fit:+0.0000 -0.1431 +730.7884
L best:-0.0001 +0.1712 +75.4975
R best:-0.0002 +0.2006 +583.1796
In [37]:
test_img = mpimg.imread('./test_images/test2.jpg')
_ = show_process_images(test_img, objpoints, imgpoints)
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:94: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:96: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
Radius:3242.6061 
Vehicle Position:-0.3624 
L diff:+0.0003 +0.4508 +125.0407
R diff:+0.0002 +0.3393 +141.0197
L fit:-0.0002 +0.2819 +58.5530
R fit:-0.0002 +0.1962 +589.7687
L best:-0.0001 +0.1934 +72.1086
R best:-0.0002 +0.1997 +584.4975
Radius:3242.6061 
Vehicle Position:-0.3624 
L diff:+0.0003 +0.4508 +125.0407
R diff:+0.0002 +0.3393 +141.0197
L fit:-0.0002 +0.2819 +58.5530
R fit:-0.0002 +0.1962 +589.7687
L best:-0.0001 +0.1934 +72.1086
R best:-0.0002 +0.1997 +584.4975
In [38]:
test_img = mpimg.imread('./test_images/test4.jpg')
_ = show_process_images(test_img, objpoints, imgpoints)
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:94: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:96: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
Radius:3989.7219 
Vehicle Position:-0.3996 
L diff:+0.0003 +0.3638 +114.5758
R diff:+0.0005 +0.5395 +166.3079
L fit:+0.0001 -0.0819 +173.1288
R fit:+0.0003 -0.3433 +756.0766
L best:-0.0001 +0.1712 +75.4975
R best:-0.0002 +0.2006 +583.1796
Radius:3989.7219 
Vehicle Position:-0.3996 
L diff:+0.0003 +0.3638 +114.5758
R diff:+0.0005 +0.5395 +166.3079
L fit:+0.0001 -0.0819 +173.1288
R fit:+0.0003 -0.3433 +756.0766
L best:-0.0001 +0.1712 +75.4975
R best:-0.0002 +0.2006 +583.1796

TODO: Consider the following

The approach to better this is to

Try other color spaces. For instance looking at the blue and green channels of the RGB might give you yellow pixels easily (yellow = blue + green right) Try to limit your search based on the expected lane width. This will prevent the weird effects that you see in your final image. In every detection, reinforce the weaker lane (lane with lesser pixels) with the stronger lane by assuming that the two lanes are parallel.

If there are too few pixels detected in a particular lane, just discard it and use the previous lane as the value. One another approach I took was to do a weighted average of the polynomials from previous frames to current one, based on the number of thresholded pixels. So if the current image has a lot of thresholded pixels, then I give the current polynomial a lot of importance, otherwise no. This is a generalization of point 3.

In [39]:
test_img = mpimg.imread('./issue_images/org_video_image_20171217_10_22_06.jpg')
#test_output = pipeline_thresh(test_img, s_thresh=(170, 255), sx_thresh=(5, 100))
test_lane = show_process_images(test_img, objpoints, imgpoints)#, sx_thresh=(5, 100))
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:94: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
C:\Anaconda3\envs\carnd-term1\lib\site-packages\ipykernel_launcher.py:96: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
Radius:1649.1730 
Vehicle Position:0.2514 
L diff:+0.0006 +0.5886 +195.4910
R diff:+0.0000 +0.1604 +165.4430
L fit:+0.0007 -0.6705 +368.6197
R fit:+0.0003 -0.5037 +590.6336
L best:+0.0001 +0.0029 +134.1219
R best:-0.0001 +0.0597 +584.6704
Radius:1649.1730 
Vehicle Position:0.2514 
L diff:+0.0006 +0.5886 +195.4910
R diff:+0.0000 +0.1604 +165.4430
L fit:+0.0007 -0.6705 +368.6197
R fit:+0.0003 -0.5037 +590.6336
L best:+0.0001 +0.0029 +134.1219
R best:-0.0001 +0.0597 +584.6704
In [40]:
_ = show_process_images(test_img, objpoints, imgpoints,test_lane)#left_fit_prev, right_fit_prev)
Radius:1708.1456 
Vehicle Position:-0.6300 
L diff:+0.0003 +0.2223 +28.4200
R diff:+0.0006 +0.7815 +45.2978
L fit:+0.0010 -0.8929 +397.0398
R fit:-0.0002 +0.2778 +545.3358
L best:-0.0001 +0.1712 +75.4975
R best:-0.0002 +0.2006 +583.1796
Radius:1708.1456 
Vehicle Position:-0.6300 
L diff:+0.0003 +0.2223 +28.4200
R diff:+0.0006 +0.7815 +45.2978
L fit:+0.0010 -0.8929 +397.0398
R fit:-0.0002 +0.2778 +545.3358
L best:-0.0001 +0.1712 +75.4975
R best:-0.0002 +0.2006 +583.1796

Thresholded binary image

  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.

Perspective transform to rectify binary image ("birds-eye view")

  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.

Lane boundaries and numerical estimation of lane curvature and vehicle position

  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
In [41]:
#flg_in_process = False
#left_fit = None
#right_fit = None
#count = 0

#right_line, left_line = None, None


DIR_ORG_VIDEO_SHOT = "./video_images/"
DIR_CVT_VIDEO_SHOT = "./video_images/"
PREFIX_VIDEO_FILE_NAME = "video_image_"
SUFFIX_ORG_VIDEO_FILE_NAME = "_org"
SUFFIX_GEN_VIDEO_FILE_NAME = "_gen"
FLG_FILE_OUTPUT = True

ORG_VIDEO_FILE_NAME = "org_video_image_"
CVT_VIDEO_FILE_NAME = "cvt_video_image_"

from PIL import Image
def save_image(image, dirname, filename, suffix= None):
    """save a image file"""  
    datetime_now_text = datetime.now().strftime("%Y%m%d_%H_%M_%S")
    filepath = dirname + filename + datetime.now().strftime("%Y%m%d_%H_%M_%S.jpg")
    if suffix != None:
        filepath = dirname + filename + datetime_now_text + suffix + ".jpg"
    else:
        filepath = dirname + filename + datetime_now_text + ".jpg"
    if not os.path.exists(filepath) :
        Image.fromarray(image).save(filepath)
In [42]:
lane = None
In [43]:
from datetime import datetime
import os

def video_pipeline(image): 
    import __main__
    processed_image = np.copy(image)
    
    # Undistort image 
    processed_image = cal_undistort(processed_image, objpoints, imgpoints)
    
    processed_image = pipeline_thresh(processed_image)
    binary_warped, M, Minv = pipeline_warp(processed_image)

    #if flg_in_process == False:
    if __main__.lane == None:
        __main__.lane = Lane()
        __main__.lane = find_lane(binary_warped, __main__.lane)
    else:
        __main__.lane = find_lane_again(binary_warped, __main__.lane)
    
    left_fit = __main__.lane.left_line.select_fit()#current_fit
    right_fit = __main__.lane.right_line.select_fit()#current_fit

    line_projector = LineProjector(Minv)
    processed_image = line_projector.project_lines(image, binary_warped, left_fit, right_fit)
    processed_image = put_lane_info(processed_image, lane)

    if FLG_FILE_OUTPUT == True:
        save_image(image,DIR_ORG_VIDEO_SHOT, PREFIX_VIDEO_FILE_NAME, SUFFIX_ORG_VIDEO_FILE_NAME)
        save_image(processed_image,DIR_CVT_VIDEO_SHOT, PREFIX_VIDEO_FILE_NAME, SUFFIX_GEN_VIDEO_FILE_NAME)
    
    return processed_image # This must be a color image
In [44]:
def process_image(image):
    # NOTE: The output you return should be a color image (3 channel) for processing video below
    # TODO: put your pipeline here,
    # you should return the final output (image where lines are drawn on lanes)
    result = video_pipeline(image)
    return result
In [45]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
In [46]:
VIDEO_INPUT = 'challenge_video.mp4'
VIDEO_OUTPUT = 'output_images/challenge_video_output.mp4'
lane = None
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip(VIDEO_INPUT)
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(VIDEO_OUTPUT, audio=False)
#flg_in_process = False
#left_fit = None
#right_fit = None
lane = None
[MoviePy] >>>> Building video output_images/challenge_video_output.mp4
[MoviePy] Writing video output_images/challenge_video_output.mp4
100%|████████████████████████████████████████████████████████████████████████████████| 485/485 [09:37<00:00,  1.00it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: output_images/challenge_video_output.mp4 

Wall time: 9min 39s
In [47]:
HTML("""
<video width="960" height="540" controls>
  <source src="{0}">
</video>
""".format(VIDEO_OUTPUT))
Out[47]: